https://www.larousse.fr/dictionnaires/francais/inf%C3%A9rence/42907
inférence : Opération par laquelle on passe d'une assertion considérée comme vraie à une autre assertion au moyen d'un système de règles qui rend cette deuxième assertion également vraie.
Inférence statistique : Ensemble des méthodes permettant de formuler en termes probabilistes un jugement sur une population à partir des résultats observés sur un échantillon extrait au hasard de cette population.
Initialisation du document¶
In [1]:
import os
# import json
# @param ["tensorflow", "jax", "torch"]
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ['KERAS_BACKEND'] = 'tensorflow'
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
# os.environ['TF_XLA_FLAGS']='--tf_xla_auto_jit=1,--tf_xla_always_defer_compilation=true'
# os.environ['XLA_FLAGS']='--xla_backend_optimization_level=0,--xla_gpu_autotune_level=4,--xla_gpu_disable_ptxas_optimizations=true,--xla_gpu_use_cudnn_batchnorm_level=2'
# os.environ['TF_GPU_ALLOCATOR']='cuda_malloc_async'
os.environ['TF_CPP_MIN_LOG_LEVEL']='5'
Import libriries ¶
In [2]:
import numpy as np, pandas as pd, seaborn as sns, warnings, os, sys, pickle, time
from matplotlib import pyplot as plt
from datetime import datetime as dt
import matplotlib.font_manager as fm
from matplotlib.lines import Line2D
warnings.filterwarnings(action="ignore")
if int(str(sns.__version__).split('.')[1]) > 8 :
plt.style.use('seaborn-v0_8-darkgrid')
else:
plt.style.use('seaborn-darkgrid')
sns.set(font_scale=2)
In [3]:
import tensorflow as tf
from tensorflow.keras.datasets import mnist,fashion_mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Reshape, BatchNormalization
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from sklearn.preprocessing import label_binarize
from sklearn.metrics import make_scorer, confusion_matrix, roc_curve, auc, accuracy_score, log_loss, hamming_loss, \
precision_score, recall_score, f1_score, fbeta_score, jaccard_score, \
precision_recall_curve, average_precision_score, balanced_accuracy_score, \
classification_report,roc_auc_score
tf.get_logger().setLevel('ERROR')# Suppress TensorFlow logging (2)
In [4]:
print("Tensorflow\t : %s\tCUDA %s\tGPU %s\tXLA %s\nKeras\t\t : %s\nPandas\t\t : %s\nNumPy\t\t : %s"%
(tf.__version__,
tf.test.is_built_with_cuda(),
tf.test.is_built_with_gpu_support(),
tf.test.is_built_with_xla(),
tf.keras.__version__,
pd.__version__,
np.__version__))
Tensorflow : 2.17.0 CUDA True GPU True XLA True Keras : 3.6.0 Pandas : 2.2.2 NumPy : 1.26.4
Initialisation des GPUs presents¶
In [ ]:
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0 :
for i, gpu in enumerate(physical_devices):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
strategy = tf.distribute.experimental.CentralStorageStrategy()
print('Le système est initialisé avec {0:d} GPUs'.format(strategy.num_replicas_in_sync))
Outils du document¶
In [6]:
palette = [
"#030aa7", "#e50000", "#d8863b", "#005f6a", "#6b7c85", "#751973", "#d1e5f0", "#fddbc7",
"#ffffcb", "#12e193", "#d8dcd6", "#ffdaf0", "#dfc5fe", "#f5054f", "#a0450e",
"#0339f8", "#f4320c", "#fec615", "#017a79", "#85a3b2", "#fe2f4a", "#a00498", "#b04e0f",
"#0165fc", "#ff724c", "#fddc5c", "#11875d", "#89a0b0", "#fe828c", "#cb00f5", "#b75203",
"#0485d1", "#ff7855", "#fbeeac", "#0cb577", "#95a3a6", "#ffb7ce", "#c071fe", "#ca6b02",
"#92c5de", "#f4a582", "#fef69e", "#18d17b", "#c5c9c7", "#ffcfdc", "#caa0ff", "#cb7723",
"#d1e5f0", "#fddbc7", "#ffffcb", "#12e193", "#d8dcd6", "#ffdaf0", "#dfc5fe", "#d8863b",
"#030764", "#be0119", "#dbb40c", "#005249", "#3c4142", "#cb0162", "#5d1451", "#653700",
"#040348", "#67001f", "#b27a01", "#002d04", "#000000", "#a0025c", "#490648", "#3c0008"
]
In [17]:
nom_projet = "10.Les Filtres Reponses VGG19"
repertoireProjet = os.getcwd()
repertoireEnregistrement = repertoireProjet +'/'+nom_projet+ '/repertoire.images'
repertoireSauvegardes = repertoireProjet +'/'+nom_projet+ '/repertoire.sauvegardes'
def controleExistenceRepertoire(directory, create_if_needed=True):
"""Voir si le répertoire existe. S'il n'existe pas il est créé."""
path_exists = os.path.exists(directory)
if path_exists:
if not os.path.isdir(directory):
raise Exception("Trouvé le nom "+directory+" mais c'est un fichier, pas un répertoire")
return False
return True
if create_if_needed:
os.makedirs(directory)
controleExistenceRepertoire(repertoireEnregistrement)
controleExistenceRepertoire(repertoireSauvegardes)
def sauvegarderImage( fichier):
"""Enregistrez la figure. Appelez la méthode juste avant plt.show ()."""
plt.savefig(os.path.join(repertoireEnregistrement,
fichier+f"--{dt.now().strftime('%Y_%m_%d_%H.%M.%S')}.png"),
dpi=600,
bbox_inches='tight')
def sauvegarderModelPoids(model, fichierPoids, repertoireSauvegardes=repertoireSauvegardes):
"""Enregistrez les poids du modèle Keras."""
if fichierPoids != None:
controleExistenceRepertoire(repertoireSauvegardes)
nomFichier = os.path.join(repertoireSauvegardes, '{}.keras'.format(fichierPoids))
model.save_weights(nomFichier)
def sauvegarderModel(model, fichier, repertoireSauvegardes=repertoireSauvegardes):
"""Enregistrez le modèle Keras."""
if fichier != None:
controleExistenceRepertoire(repertoireSauvegardes)
nomFichier = os.path.join(repertoireSauvegardes, '{}.keras'.format(fichier))
model.save(nomFichier)
def lectureModelPoids(model, fichier, repertoireSauvegardes=repertoireSauvegardes):
"""Si le fichier existe, il est chargé et retourne True, sinon retourne False."""
nomFichier = os.path.join(repertoireSauvegardes, '{}.keras'.format(fichier))
if os.path.exists(nomFichier):
if os.path.isfile(nomFichier):
model.load_weights(nomFichier)
return True
return False
def sauvegardeHistorique(model,
repertoireSauvegardes,
nomSauvegarde='one_hidden_layer_history_batch_size_1'):
history = pd.DataFrame( model.history)
history.reset_index(inplace=True)
history.rename(columns={'index':'epoch'},inplace=True)
history.to_parquet(os.path.join(repertoireSauvegardes,f'{nomSauvegarde}.gzip'),compression='gzip', engine='pyarrow')
return history
def afficheHistoriqueEntrainement(history, palette, nom='afficheHistoriqueEntrainement'):
fig, ax = plt.subplots(nrows=1, ncols=2,figsize=(48,16));
markersize = 8
linewidth=2
graph = sns.lineplot(x='epoch',
y='accuracy',
data=history,
ax=ax[0],
label='accuracy',
err_style=None,
marker='o',
markersize=markersize,
linewidth=linewidth,
color=palette[0],
);
graph = sns.lineplot(x='epoch',
y='val_accuracy',
data=history,
ax=ax[0],
label='val_accuracy',
err_style=None,
marker='o',
markersize=markersize,
linewidth=linewidth,
color=palette[1],
);
graph = sns.lineplot(x='epoch',
y='loss',
data=history,
ax=ax[1],
label='loss',
err_style=None,
marker='o',
markersize=markersize,
linewidth=linewidth,
color=palette[0],
);
graph = sns.lineplot(x='epoch',
y='val_loss',
data=history,
ax=ax[1],
label='val_loss',
err_style=None,
marker='o',
markersize=markersize,
linewidth=linewidth,
color=palette[1],
);
sauvegarderImage(nom)
def afficheMatriceConfusion(observations,predictions,dictLabels):
plt.figure(figsize=(8,8))
sns.set(font_scale=1.5)
sns.heatmap(pd.crosstab(observations,predictions),
fmt= '.0f',
linewidths=0.3,
#vmax=1.0,
square=True,
cmap=plt.cm.Blues,
linecolor='white',
annot=True,
cbar=False,
xticklabels=dictLabels.values(),
yticklabels=dictLabels.values()
);
plt.xlabel('Observations', fontsize = 18);
plt.ylabel('Prédictions', fontsize = 18);
sauvegarderImage('afficheMatriceConfusion')
def executeApprentissageChoixClassifieurs(model,
X_test,
y_test,
label_dict,
couleurs,
nom_essai = 'initial'
):
def afficheCourbes(vraisPositifs,fauxPositifs,aucROCt,precisions,sensibilites,avgPrecRec,nbClasses,lw,label_dict):
plt.figure(figsize=(24, 24));
for i, color in zip(range(nbClasses), palette):
plt.plot(fauxPositifs[i], vraisPositifs[i], color=color, lw=lw,
label=' ' + label_dict[i] + ' (AUC = {1:0.8f})'
''.format(i, aucROCt[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('Taux de faux Positifs-(1 - Spécificité) = VN / (FP + VN)',size=18)
plt.ylabel('Taux de vrais positifs-Sensibilité = VP / (VP + FN)',size=18)
plt.title('Courbe ROC (Receiver Operating Caracteristic) -- ',size=20)
plt.legend(loc="lower right"); #, fontsize='large'
sauvegarderImage('Courbe ROC')
plt.figure(figsize=(24,24));
f_scores = np.linspace(0.2, 0.9, num=8)
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
for i, color in zip(range(nbClasses), palette):
plt.step(sensibilites[i],
precisions[i],
where='post',
color=color,
lw=lw,
label=f"{label_dict[i]}(APR = {avgPrecRec[label_dict[i]]:0.8f})"
)
plt.fill_between(sensibilites[i], precisions[i], step='post', alpha=0.05)
# plt.plot(fauxPositifs[i], vraisPositifs[i], color=color, lw=lw,
# label=' ' + label_dict[i] + ' (AUC = {1:0.8f})'
# ''.format(i, aucROCt[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('Sensibilité/Rappel(Recall) = VP / (VP + FN)',size=18)
plt.ylabel('Précision = VP / (VP + FP)',size=18)
plt.title('Courbe Précision-Rappel',size=20)
plt.legend(loc="lower right") # , fontsize = 'large'
sauvegarderImage('Courbe Précision-Rappel')
cvF1, cvF1SD, cvAccuracy, cvAccSD, aucROC, avgPrecRec, accuracy, balanced_accuracy, logloss, hammingloss, precision, sensibilite, \
f1, f2, f05, jaccard, vrais_negatifs, faux_positifs, faux_negatifs, vrais_positifs, total_positifs, aucROCtn = \
dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(),\
dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict()
#
oneloss, precision_micro, precision_macro, precision_weighted, \
sensibilite_macro, sensibilite_micro, sensibilite_weighted, \
f1_micro, f1_macro, f1_weighted,f2_micro,f2_macro,f2_weighted,f05_micro,f05_macro,f05_weighted = \
dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), \
dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict()
fauxPositifs, vraisPositifs, precisions, sensibilites, aucROCt, pr_auc, tauxROC, tauxPR = dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict()
lw = 1
# couleurs = sns.hls_palette(len(classifieursDict.keys()), l=.4, s=.9)
nbClasses = len(dictLabels.keys())
listClasses = list(dictLabels.keys())
y_testA = label_binarize(y_test, classes=listClasses)
plt.figure(figsize=(18,18))
t1 = time.time()
classifier = model
# y_score = model.predict_proba(X_test)
# y_pred = model.predict(X_test)
y_score = model.predict(X_test)
y_pred = np.argmax(y_score, axis=-1)
y_predA = label_binarize(y_pred, classes=listClasses)
accuracy['global'] = accuracy_score(y_test, y_pred)
balanced_accuracy['global'] = balanced_accuracy_score(y_test, y_pred)
precision['global'] = precision_score(y_test, y_pred, average='weighted')
sensibilite['global'] = recall_score(y_test, y_pred, average='weighted')
f1['global'] = f1_score(y_test, y_pred, average='weighted')
f2['global'] = fbeta_score(y_test, y_pred, beta=2, average='weighted')
f05['global'] = fbeta_score(y_test, y_pred, beta=0.5, average='weighted')
vrais_negatifs['global'] = 0
faux_positifs ['global'] = 0
faux_negatifs ['global'] = 0
vrais_positifs['global'] = 0
total_positifs['global'] = 0
aucROC['global'] = roc_auc_score(y_test, y_score, multi_class='ovr')
for i in range(nbClasses):
fauxPositifs[i], vraisPositifs[i], tauxROC[i] = roc_curve(y_testA[:, i], y_score[:, i])
aucROCt[i] = auc(fauxPositifs[i], vraisPositifs[i])
precisions[i], sensibilites[i], tauxPR[i] = precision_recall_curve(y_testA[:, i], y_score[:, i])
aucROC[label_dict[i]] = aucROCt[i]
avgPrecRec[label_dict[i]] = average_precision_score(y_testA[:, i], y_score[:, i])
accuracy[label_dict[i]] = accuracy_score(y_testA[:, i], y_predA[:, i])
balanced_accuracy[label_dict[i]] = balanced_accuracy_score(y_testA[:, i],y_predA[:, i])
logloss[label_dict[i]] = log_loss(y_testA[:, i], y_predA[:, i])
hammingloss[label_dict[i]] = hamming_loss(y_testA[:, i], y_predA[:, i])
precision[label_dict[i]] = precision_score(y_testA[:, i], y_predA[:, i])
sensibilite[label_dict[i]] = recall_score(y_testA[:, i], y_predA[:, i])
f1[label_dict[i]] = f1_score(y_testA[:, i], y_predA[:, i])
f2[label_dict[i]] = fbeta_score(y_testA[:, i], y_predA[:, i], beta=2)
f05[label_dict[i]] = fbeta_score(y_testA[:, i], y_predA[:, i], beta=0.5)
jaccard[label_dict[i]] = jaccard_score(y_testA[:, i], y_predA[:, i])
vrais_negatifs[label_dict[i]] = confusion_matrix(y_testA[:, i], y_predA[:, i])[0, 0]
faux_positifs[label_dict[i]] = confusion_matrix(y_testA[:, i], y_predA[:, i])[0, 1]
faux_negatifs[label_dict[i]] = confusion_matrix(y_testA[:, i], y_predA[:, i])[1, 0]
vrais_positifs[label_dict[i]] = confusion_matrix(y_testA[:, i], y_predA[:, i])[1, 1]
total_positifs[label_dict[i]] = vrais_positifs[label_dict[i]] + faux_negatifs [label_dict[i]]
vrais_negatifs['global'] += vrais_negatifs[label_dict[i]]
faux_positifs ['global'] += faux_positifs [label_dict[i]]
faux_negatifs ['global'] += faux_negatifs [label_dict[i]]
vrais_positifs['global'] += vrais_positifs[label_dict[i]]
total_positifs['global'] = vrais_positifs['global'] + faux_negatifs ['global']
fauxPositifs["micro"], vraisPositifs["micro"], _ = roc_curve(y_testA.ravel(), y_score.ravel())
aucROCt["micro"] = auc(fauxPositifs["micro"], vraisPositifs["micro"])
listFauxPositifs = np.unique(np.concatenate([fauxPositifs[i] for i in range(nbClasses)]))
moyenneVraisPositifs = np.zeros_like(listFauxPositifs)
for i in range(nbClasses):
moyenneVraisPositifs += np.interp(listFauxPositifs, fauxPositifs[i], vraisPositifs[i])
moyenneVraisPositifs /= nbClasses
fauxPositifs["macro"], vraisPositifs["macro"] = listFauxPositifs, moyenneVraisPositifs
aucROCt["macro"] = auc(fauxPositifs["macro"], vraisPositifs["macro"])
# aucROC['global'] = aucROCt["macro"] # (aucROCt["micro"],aucROCt["macro"])
avgPrecRec['global'] = average_precision_score(y_testA.ravel(), y_score.ravel(), average='weighted')
afficheCourbes(vraisPositifs,fauxPositifs,aucROCt,precisions,sensibilites,avgPrecRec,nbClasses,lw,label_dict);
print ("Area under the ROC curve : %0.4f" % aucROC['global'],end='\t--\t')
print('Exécution :'+('%.2fs' % (time.time() - t1)).lstrip('0'))
resultats = pd.DataFrame(pd.Series(aucROC), columns=["aucROC"])
resultats["avgPrecRec"] = pd.Series(avgPrecRec)
resultats["f1"] = pd.Series(f1)
resultats["f2"] = pd.Series(f2)
resultats["f05"] = pd.Series(f05)
resultats["accuracy"] = pd.Series(accuracy)
resultats["precision"] = pd.Series(precision)
resultats["sensibilite"] = pd.Series(sensibilite)
resultats["vrais_positifs"] = pd.Series(vrais_positifs)
resultats["vrais_negatifs"] = pd.Series(vrais_negatifs)
resultats["faux_positifs"] = pd.Series(faux_positifs)
resultats["faux_negatifs"] = pd.Series(faux_negatifs)
resultats["total_positifs"] = pd.Series(total_positifs)
resultats.reset_index(inplace=True)
resultats.rename(columns={"index": "Classe"}, inplace=True)
resultats['essai'] = nom_essai
return resultats
def affichePrediction(model, img_path, decode_predictions, preprocess_input, size=(224, 224)):
img = tf.keras.preprocessing.image.load_img(img_path, target_size=size)
x = tf.keras.preprocessing.image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
prediction = pd.DataFrame({'Classe':[x[1] for x in decode_predictions(preds)[0]],
'Probabilite':[x[2] for x in decode_predictions(preds)[0]]})
fig = plt.figure(figsize=(36,18))
fig.subplots_adjust(wspace=0.3)
plt.subplot(1,2,1)
img = plt.imread(img_path)
plt.axis('off')
plt.imshow(img);
plt.subplot(1,2,2)
graph = sns.barplot(
x='Classe',
y='Probabilite',
data=prediction.sort_values('Probabilite',ascending=False),
palette=palette[1:]
);
for patche in graph.patches:
if patche.get_height() > 0 :
graph.text(
patche.get_x()+0.4,
2*patche.get_height()/3,
f'{patche.get_height()*100:0.2f}%',
color='black',
rotation='vertical',
# size='large',
fontsize='large',
bbox=dict(boxstyle='round', facecolor='white', alpha=0.6),
verticalalignment='center',
horizontalalignment='center',
)
return prediction
# This image might have a very wide range of values, so center them
# and scale by the standard deviation so that we see most of the values
def prep_image_for_display(input_image):
image = np.copy(input_image)
image -= image.mean()
image /= (image.std() + 1e-5)
image *= 64
image += 128
image = np.clip(image, 0, 255).astype('uint8')
return image
Lecture des données¶
In [8]:
!ls -al ../images/donnees/jason.jpg
-rw-rw-r-- 1 razvan razvan 380151 févr. 10 2019 ../images/donnees/jason.jpg
In [9]:
jason = '../images/donnees/jason.jpg'
img = tf.keras.preprocessing.image.load_img(jason, target_size=(224, 224))
img_tensor = tf.keras.preprocessing.image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255
plt.figure(figsize=(8, 8))
plt.imshow(img_tensor[0])
print(img_tensor.shape)
img_tensor -= 0.5
(1, 224, 224, 3)
Chargement du modèles VGG19¶
In [10]:
modelVGG19 = tf.keras.applications.VGG19(weights='imagenet')
In [11]:
modelVGG19.summary()
Model: "vgg19"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ input_layer (InputLayer) │ (None, 224, 224, 3) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block1_conv1 (Conv2D) │ (None, 224, 224, 64) │ 1,792 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block1_conv2 (Conv2D) │ (None, 224, 224, 64) │ 36,928 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block1_pool (MaxPooling2D) │ (None, 112, 112, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block2_conv1 (Conv2D) │ (None, 112, 112, 128) │ 73,856 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block2_conv2 (Conv2D) │ (None, 112, 112, 128) │ 147,584 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block2_pool (MaxPooling2D) │ (None, 56, 56, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_conv1 (Conv2D) │ (None, 56, 56, 256) │ 295,168 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_conv2 (Conv2D) │ (None, 56, 56, 256) │ 590,080 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_conv3 (Conv2D) │ (None, 56, 56, 256) │ 590,080 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_conv4 (Conv2D) │ (None, 56, 56, 256) │ 590,080 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_pool (MaxPooling2D) │ (None, 28, 28, 256) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_conv1 (Conv2D) │ (None, 28, 28, 512) │ 1,180,160 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_conv2 (Conv2D) │ (None, 28, 28, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_conv3 (Conv2D) │ (None, 28, 28, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_conv4 (Conv2D) │ (None, 28, 28, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_pool (MaxPooling2D) │ (None, 14, 14, 512) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_conv1 (Conv2D) │ (None, 14, 14, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_conv2 (Conv2D) │ (None, 14, 14, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_conv3 (Conv2D) │ (None, 14, 14, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_conv4 (Conv2D) │ (None, 14, 14, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_pool (MaxPooling2D) │ (None, 7, 7, 512) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ flatten (Flatten) │ (None, 25088) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ fc1 (Dense) │ (None, 4096) │ 102,764,544 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ fc2 (Dense) │ (None, 4096) │ 16,781,312 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ predictions (Dense) │ (None, 1000) │ 4,097,000 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 143,667,240 (548.05 MB)
Trainable params: 143,667,240 (548.05 MB)
Non-trainable params: 0 (0.00 B)
Inférence¶
In [12]:
prediction = affichePrediction(modelVGG19,
'../images/donnees/jason.jpg',
decode_predictions=tf.keras.applications.vgg19.decode_predictions,
preprocess_input=tf.keras.applications.vgg19.preprocess_input,
size=(224, 224))
1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 1s/step
I0000 00:00:1751805551.106714 667469 device_compiler.h:188] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
Couches du modèle¶
In [13]:
modelVGG19.summary()
Model: "vgg19"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ input_layer (InputLayer) │ (None, 224, 224, 3) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block1_conv1 (Conv2D) │ (None, 224, 224, 64) │ 1,792 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block1_conv2 (Conv2D) │ (None, 224, 224, 64) │ 36,928 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block1_pool (MaxPooling2D) │ (None, 112, 112, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block2_conv1 (Conv2D) │ (None, 112, 112, 128) │ 73,856 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block2_conv2 (Conv2D) │ (None, 112, 112, 128) │ 147,584 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block2_pool (MaxPooling2D) │ (None, 56, 56, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_conv1 (Conv2D) │ (None, 56, 56, 256) │ 295,168 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_conv2 (Conv2D) │ (None, 56, 56, 256) │ 590,080 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_conv3 (Conv2D) │ (None, 56, 56, 256) │ 590,080 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_conv4 (Conv2D) │ (None, 56, 56, 256) │ 590,080 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block3_pool (MaxPooling2D) │ (None, 28, 28, 256) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_conv1 (Conv2D) │ (None, 28, 28, 512) │ 1,180,160 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_conv2 (Conv2D) │ (None, 28, 28, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_conv3 (Conv2D) │ (None, 28, 28, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_conv4 (Conv2D) │ (None, 28, 28, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block4_pool (MaxPooling2D) │ (None, 14, 14, 512) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_conv1 (Conv2D) │ (None, 14, 14, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_conv2 (Conv2D) │ (None, 14, 14, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_conv3 (Conv2D) │ (None, 14, 14, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_conv4 (Conv2D) │ (None, 14, 14, 512) │ 2,359,808 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ block5_pool (MaxPooling2D) │ (None, 7, 7, 512) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ flatten (Flatten) │ (None, 25088) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ fc1 (Dense) │ (None, 4096) │ 102,764,544 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ fc2 (Dense) │ (None, 4096) │ 16,781,312 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ predictions (Dense) │ (None, 1000) │ 4,097,000 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 143,667,240 (548.05 MB)
Trainable params: 143,667,240 (548.05 MB)
Non-trainable params: 0 (0.00 B)
In [14]:
dictLayers = {i:layer.name for i,layer in enumerate(modelVGG19.layers[1:-4])}
Modèle pour recouper les images transformées de chaque couche¶
In [15]:
# Get the output of every layer
layer_outputs = [layer.output for layer in modelVGG19.layers[1:-4]]
# Creates a model that will return these outputs, given the model input:
activation_model = tf.keras.models.Model(inputs=modelVGG19.input, outputs=layer_outputs)
Images transformées de chaque couche¶
In [16]:
img_activations = activation_model.predict(img_tensor)
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 286ms/step
Affichage des images transformées de chaque couche¶
In [24]:
for i in dictLayers:
if dictLayers[i][7:11] == 'conv' :
print(dictLayers[i])
plt.figure(figsize=(24,24))
for j in range(64):
image = np.copy(img_activations[i][0, :, :, j])
image = np.clip(image, 0, 255)
plt.subplot(8, 8, j+1)
plt.title(j+1, fontsize=14, y=1.03)
plt.imshow(image,cmap='grey')
plt.axis('off')
plt.tick_params(labelbottom='off', labelleft='off')
sauvegarderImage(f'{dictLayers[i]}')
plt.show()
block1_conv1
block1_conv2
block2_conv1
block2_conv2
block3_conv1
block3_conv2
block3_conv3
block3_conv4
block4_conv1
block4_conv2
block4_conv3
block4_conv4
block5_conv1
block5_conv2
block5_conv3
block5_conv4
In [ ]: